1 #ifndef _LINUX_WAIT_H
2 #define _LINUX_WAIT_H
3
4 #define WNOHANG 0x00000001
5 #define WUNTRACED 0x00000002
6 #define WSTOPPED WUNTRACED
7 #define WEXITED 0x00000004
8 #define WCONTINUED 0x00000008
9 #define WNOWAIT 0x01000000 /* Don't reap, just poll status. */
10
11 #define __WNOTHREAD 0x20000000 /* Don't wait on children of other threads in this group */
12 #define __WALL 0x40000000 /* Wait on all children, regardless of type */
13 #define __WCLONE 0x80000000 /* Wait only on non-SIGCHLD children */
14
15 /* First argument to waitid: */
16 #define P_ALL 0
17 #define P_PID 1
18 #define P_PGID 2
19
20 #ifdef __KERNEL__
21
22 #include <linux/list.h>
23 #include <linux/stddef.h>
24 #include <linux/spinlock.h>
25 #include <asm/system.h>
26 #include <asm/current.h>
27
28 typedef struct __wait_queue wait_queue_t;
29 typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
30 int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
31
32 struct __wait_queue {
33 unsigned int flags;
34 #define WQ_FLAG_EXCLUSIVE 0x01
35 void *private;
36 wait_queue_func_t func;
37 struct list_head task_list;
38 };
39
40 struct wait_bit_key {
41 void *flags;
42 int bit_nr;
43 };
44
45 struct wait_bit_queue {
46 struct wait_bit_key key;
47 wait_queue_t wait;
48 };
49
50 struct __wait_queue_head {
51 spinlock_t lock;
52 struct list_head task_list;
53 };
54 typedef struct __wait_queue_head wait_queue_head_t;
55
56 struct task_struct;
57
58 /*
59 * Macros for declaration and initialisaton of the datatypes
60 */
61
62 #define __WAITQUEUE_INITIALIZER(name, tsk) { \
63 .private = tsk, \
64 .func = default_wake_function, \
65 .task_list = { NULL, NULL } }
66
67 #define DECLARE_WAITQUEUE(name, tsk) \
68 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
69
70 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
71 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
72 .task_list = { &(name).task_list, &(name).task_list } }
73
74 #define DECLARE_WAIT_QUEUE_HEAD(name) \
75 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
76
77 #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
78 { .flags = word, .bit_nr = bit, }
79
80 extern void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *);
81
82 #define init_waitqueue_head(q) \
83 do { \
84 static struct lock_class_key __key; \
85 \
86 __init_waitqueue_head((q), &__key); \
87 } while (0)
88
89 #ifdef CONFIG_LOCKDEP
90 # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
91 ({ init_waitqueue_head(&name); name; })
92 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
93 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
94 #else
95 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
96 #endif
97
init_waitqueue_entry(wait_queue_t * q,struct task_struct * p)98 static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
99 {
100 q->flags = 0;
101 q->private = p;
102 q->func = default_wake_function;
103 }
104
init_waitqueue_func_entry(wait_queue_t * q,wait_queue_func_t func)105 static inline void init_waitqueue_func_entry(wait_queue_t *q,
106 wait_queue_func_t func)
107 {
108 q->flags = 0;
109 q->private = NULL;
110 q->func = func;
111 }
112
waitqueue_active(wait_queue_head_t * q)113 static inline int waitqueue_active(wait_queue_head_t *q)
114 {
115 return !list_empty(&q->task_list);
116 }
117
118 extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
119 extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
120 extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
121
__add_wait_queue(wait_queue_head_t * head,wait_queue_t * new)122 static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
123 {
124 list_add(&new->task_list, &head->task_list);
125 }
126
127 /*
128 * Used for wake-one threads:
129 */
__add_wait_queue_exclusive(wait_queue_head_t * q,wait_queue_t * wait)130 static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
131 wait_queue_t *wait)
132 {
133 wait->flags |= WQ_FLAG_EXCLUSIVE;
134 __add_wait_queue(q, wait);
135 }
136
__add_wait_queue_tail(wait_queue_head_t * head,wait_queue_t * new)137 static inline void __add_wait_queue_tail(wait_queue_head_t *head,
138 wait_queue_t *new)
139 {
140 list_add_tail(&new->task_list, &head->task_list);
141 }
142
__add_wait_queue_tail_exclusive(wait_queue_head_t * q,wait_queue_t * wait)143 static inline void __add_wait_queue_tail_exclusive(wait_queue_head_t *q,
144 wait_queue_t *wait)
145 {
146 wait->flags |= WQ_FLAG_EXCLUSIVE;
147 __add_wait_queue_tail(q, wait);
148 }
149
__remove_wait_queue(wait_queue_head_t * head,wait_queue_t * old)150 static inline void __remove_wait_queue(wait_queue_head_t *head,
151 wait_queue_t *old)
152 {
153 list_del(&old->task_list);
154 }
155
156 void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
157 void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
158 void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
159 void *key);
160 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode);
161 void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
162 void __wake_up_bit(wait_queue_head_t *, void *, int);
163 int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
164 int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
165 void wake_up_bit(void *, int);
166 int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned);
167 int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned);
168 wait_queue_head_t *bit_waitqueue(void *, int);
169
170 #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
171 #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
172 #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
173 #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL)
174
175 #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
176 #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
177 #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
178 #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
179
180 /*
181 * Wakeup macros to be used to report events to the targets.
182 */
183 #define wake_up_poll(x, m) \
184 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
185 #define wake_up_locked_poll(x, m) \
186 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
187 #define wake_up_interruptible_poll(x, m) \
188 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
189 #define wake_up_interruptible_sync_poll(x, m) \
190 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
191
192 #define __wait_event(wq, condition) \
193 do { \
194 DEFINE_WAIT(__wait); \
195 \
196 for (;;) { \
197 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
198 if (condition) \
199 break; \
200 schedule(); \
201 } \
202 finish_wait(&wq, &__wait); \
203 } while (0)
204
205 /**
206 * wait_event - sleep until a condition gets true
207 * @wq: the waitqueue to wait on
208 * @condition: a C expression for the event to wait for
209 *
210 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
211 * @condition evaluates to true. The @condition is checked each time
212 * the waitqueue @wq is woken up.
213 *
214 * wake_up() has to be called after changing any variable that could
215 * change the result of the wait condition.
216 */
217 #define wait_event(wq, condition) \
218 do { \
219 if (condition) \
220 break; \
221 __wait_event(wq, condition); \
222 } while (0)
223
224 #define __wait_event_timeout(wq, condition, ret) \
225 do { \
226 DEFINE_WAIT(__wait); \
227 \
228 for (;;) { \
229 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
230 if (condition) \
231 break; \
232 ret = schedule_timeout(ret); \
233 if (!ret) \
234 break; \
235 } \
236 finish_wait(&wq, &__wait); \
237 } while (0)
238
239 /**
240 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
241 * @wq: the waitqueue to wait on
242 * @condition: a C expression for the event to wait for
243 * @timeout: timeout, in jiffies
244 *
245 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
246 * @condition evaluates to true. The @condition is checked each time
247 * the waitqueue @wq is woken up.
248 *
249 * wake_up() has to be called after changing any variable that could
250 * change the result of the wait condition.
251 *
252 * The function returns 0 if the @timeout elapsed, and the remaining
253 * jiffies if the condition evaluated to true before the timeout elapsed.
254 */
255 #define wait_event_timeout(wq, condition, timeout) \
256 ({ \
257 long __ret = timeout; \
258 if (!(condition)) \
259 __wait_event_timeout(wq, condition, __ret); \
260 __ret; \
261 })
262
263 #define __wait_event_interruptible(wq, condition, ret) \
264 do { \
265 DEFINE_WAIT(__wait); \
266 \
267 for (;;) { \
268 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
269 if (condition) \
270 break; \
271 if (!signal_pending(current)) { \
272 schedule(); \
273 continue; \
274 } \
275 ret = -ERESTARTSYS; \
276 break; \
277 } \
278 finish_wait(&wq, &__wait); \
279 } while (0)
280
281 /**
282 * wait_event_interruptible - sleep until a condition gets true
283 * @wq: the waitqueue to wait on
284 * @condition: a C expression for the event to wait for
285 *
286 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
287 * @condition evaluates to true or a signal is received.
288 * The @condition is checked each time the waitqueue @wq is woken up.
289 *
290 * wake_up() has to be called after changing any variable that could
291 * change the result of the wait condition.
292 *
293 * The function will return -ERESTARTSYS if it was interrupted by a
294 * signal and 0 if @condition evaluated to true.
295 */
296 #define wait_event_interruptible(wq, condition) \
297 ({ \
298 int __ret = 0; \
299 if (!(condition)) \
300 __wait_event_interruptible(wq, condition, __ret); \
301 __ret; \
302 })
303
304 #define __wait_event_interruptible_timeout(wq, condition, ret) \
305 do { \
306 DEFINE_WAIT(__wait); \
307 \
308 for (;;) { \
309 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
310 if (condition) \
311 break; \
312 if (!signal_pending(current)) { \
313 ret = schedule_timeout(ret); \
314 if (!ret) \
315 break; \
316 continue; \
317 } \
318 ret = -ERESTARTSYS; \
319 break; \
320 } \
321 finish_wait(&wq, &__wait); \
322 } while (0)
323
324 /**
325 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
326 * @wq: the waitqueue to wait on
327 * @condition: a C expression for the event to wait for
328 * @timeout: timeout, in jiffies
329 *
330 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
331 * @condition evaluates to true or a signal is received.
332 * The @condition is checked each time the waitqueue @wq is woken up.
333 *
334 * wake_up() has to be called after changing any variable that could
335 * change the result of the wait condition.
336 *
337 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
338 * was interrupted by a signal, and the remaining jiffies otherwise
339 * if the condition evaluated to true before the timeout elapsed.
340 */
341 #define wait_event_interruptible_timeout(wq, condition, timeout) \
342 ({ \
343 long __ret = timeout; \
344 if (!(condition)) \
345 __wait_event_interruptible_timeout(wq, condition, __ret); \
346 __ret; \
347 })
348
349 #define __wait_event_interruptible_exclusive(wq, condition, ret) \
350 do { \
351 DEFINE_WAIT(__wait); \
352 \
353 for (;;) { \
354 prepare_to_wait_exclusive(&wq, &__wait, \
355 TASK_INTERRUPTIBLE); \
356 if (condition) { \
357 finish_wait(&wq, &__wait); \
358 break; \
359 } \
360 if (!signal_pending(current)) { \
361 schedule(); \
362 continue; \
363 } \
364 ret = -ERESTARTSYS; \
365 abort_exclusive_wait(&wq, &__wait, \
366 TASK_INTERRUPTIBLE, NULL); \
367 break; \
368 } \
369 } while (0)
370
371 #define wait_event_interruptible_exclusive(wq, condition) \
372 ({ \
373 int __ret = 0; \
374 if (!(condition)) \
375 __wait_event_interruptible_exclusive(wq, condition, __ret);\
376 __ret; \
377 })
378
379
380 #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
381 ({ \
382 int __ret = 0; \
383 DEFINE_WAIT(__wait); \
384 if (exclusive) \
385 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
386 do { \
387 if (likely(list_empty(&__wait.task_list))) \
388 __add_wait_queue_tail(&(wq), &__wait); \
389 set_current_state(TASK_INTERRUPTIBLE); \
390 if (signal_pending(current)) { \
391 __ret = -ERESTARTSYS; \
392 break; \
393 } \
394 if (irq) \
395 spin_unlock_irq(&(wq).lock); \
396 else \
397 spin_unlock(&(wq).lock); \
398 schedule(); \
399 if (irq) \
400 spin_lock_irq(&(wq).lock); \
401 else \
402 spin_lock(&(wq).lock); \
403 } while (!(condition)); \
404 __remove_wait_queue(&(wq), &__wait); \
405 __set_current_state(TASK_RUNNING); \
406 __ret; \
407 })
408
409
410 /**
411 * wait_event_interruptible_locked - sleep until a condition gets true
412 * @wq: the waitqueue to wait on
413 * @condition: a C expression for the event to wait for
414 *
415 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
416 * @condition evaluates to true or a signal is received.
417 * The @condition is checked each time the waitqueue @wq is woken up.
418 *
419 * It must be called with wq.lock being held. This spinlock is
420 * unlocked while sleeping but @condition testing is done while lock
421 * is held and when this macro exits the lock is held.
422 *
423 * The lock is locked/unlocked using spin_lock()/spin_unlock()
424 * functions which must match the way they are locked/unlocked outside
425 * of this macro.
426 *
427 * wake_up_locked() has to be called after changing any variable that could
428 * change the result of the wait condition.
429 *
430 * The function will return -ERESTARTSYS if it was interrupted by a
431 * signal and 0 if @condition evaluated to true.
432 */
433 #define wait_event_interruptible_locked(wq, condition) \
434 ((condition) \
435 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
436
437 /**
438 * wait_event_interruptible_locked_irq - sleep until a condition gets true
439 * @wq: the waitqueue to wait on
440 * @condition: a C expression for the event to wait for
441 *
442 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
443 * @condition evaluates to true or a signal is received.
444 * The @condition is checked each time the waitqueue @wq is woken up.
445 *
446 * It must be called with wq.lock being held. This spinlock is
447 * unlocked while sleeping but @condition testing is done while lock
448 * is held and when this macro exits the lock is held.
449 *
450 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
451 * functions which must match the way they are locked/unlocked outside
452 * of this macro.
453 *
454 * wake_up_locked() has to be called after changing any variable that could
455 * change the result of the wait condition.
456 *
457 * The function will return -ERESTARTSYS if it was interrupted by a
458 * signal and 0 if @condition evaluated to true.
459 */
460 #define wait_event_interruptible_locked_irq(wq, condition) \
461 ((condition) \
462 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
463
464 /**
465 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
466 * @wq: the waitqueue to wait on
467 * @condition: a C expression for the event to wait for
468 *
469 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
470 * @condition evaluates to true or a signal is received.
471 * The @condition is checked each time the waitqueue @wq is woken up.
472 *
473 * It must be called with wq.lock being held. This spinlock is
474 * unlocked while sleeping but @condition testing is done while lock
475 * is held and when this macro exits the lock is held.
476 *
477 * The lock is locked/unlocked using spin_lock()/spin_unlock()
478 * functions which must match the way they are locked/unlocked outside
479 * of this macro.
480 *
481 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
482 * set thus when other process waits process on the list if this
483 * process is awaken further processes are not considered.
484 *
485 * wake_up_locked() has to be called after changing any variable that could
486 * change the result of the wait condition.
487 *
488 * The function will return -ERESTARTSYS if it was interrupted by a
489 * signal and 0 if @condition evaluated to true.
490 */
491 #define wait_event_interruptible_exclusive_locked(wq, condition) \
492 ((condition) \
493 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
494
495 /**
496 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
497 * @wq: the waitqueue to wait on
498 * @condition: a C expression for the event to wait for
499 *
500 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
501 * @condition evaluates to true or a signal is received.
502 * The @condition is checked each time the waitqueue @wq is woken up.
503 *
504 * It must be called with wq.lock being held. This spinlock is
505 * unlocked while sleeping but @condition testing is done while lock
506 * is held and when this macro exits the lock is held.
507 *
508 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
509 * functions which must match the way they are locked/unlocked outside
510 * of this macro.
511 *
512 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
513 * set thus when other process waits process on the list if this
514 * process is awaken further processes are not considered.
515 *
516 * wake_up_locked() has to be called after changing any variable that could
517 * change the result of the wait condition.
518 *
519 * The function will return -ERESTARTSYS if it was interrupted by a
520 * signal and 0 if @condition evaluated to true.
521 */
522 #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
523 ((condition) \
524 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
525
526
527
528 #define __wait_event_killable(wq, condition, ret) \
529 do { \
530 DEFINE_WAIT(__wait); \
531 \
532 for (;;) { \
533 prepare_to_wait(&wq, &__wait, TASK_KILLABLE); \
534 if (condition) \
535 break; \
536 if (!fatal_signal_pending(current)) { \
537 schedule(); \
538 continue; \
539 } \
540 ret = -ERESTARTSYS; \
541 break; \
542 } \
543 finish_wait(&wq, &__wait); \
544 } while (0)
545
546 /**
547 * wait_event_killable - sleep until a condition gets true
548 * @wq: the waitqueue to wait on
549 * @condition: a C expression for the event to wait for
550 *
551 * The process is put to sleep (TASK_KILLABLE) until the
552 * @condition evaluates to true or a signal is received.
553 * The @condition is checked each time the waitqueue @wq is woken up.
554 *
555 * wake_up() has to be called after changing any variable that could
556 * change the result of the wait condition.
557 *
558 * The function will return -ERESTARTSYS if it was interrupted by a
559 * signal and 0 if @condition evaluated to true.
560 */
561 #define wait_event_killable(wq, condition) \
562 ({ \
563 int __ret = 0; \
564 if (!(condition)) \
565 __wait_event_killable(wq, condition, __ret); \
566 __ret; \
567 })
568
569 /*
570 * These are the old interfaces to sleep waiting for an event.
571 * They are racy. DO NOT use them, use the wait_event* interfaces above.
572 * We plan to remove these interfaces.
573 */
574 extern void sleep_on(wait_queue_head_t *q);
575 extern long sleep_on_timeout(wait_queue_head_t *q,
576 signed long timeout);
577 extern void interruptible_sleep_on(wait_queue_head_t *q);
578 extern long interruptible_sleep_on_timeout(wait_queue_head_t *q,
579 signed long timeout);
580
581 /*
582 * Waitqueues which are removed from the waitqueue_head at wakeup time
583 */
584 void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
585 void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
586 void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
587 void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
588 unsigned int mode, void *key);
589 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
590 int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
591
592 #define DEFINE_WAIT_FUNC(name, function) \
593 wait_queue_t name = { \
594 .private = current, \
595 .func = function, \
596 .task_list = LIST_HEAD_INIT((name).task_list), \
597 }
598
599 #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
600
601 #define DEFINE_WAIT_BIT(name, word, bit) \
602 struct wait_bit_queue name = { \
603 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
604 .wait = { \
605 .private = current, \
606 .func = wake_bit_function, \
607 .task_list = \
608 LIST_HEAD_INIT((name).wait.task_list), \
609 }, \
610 }
611
612 #define init_wait(wait) \
613 do { \
614 (wait)->private = current; \
615 (wait)->func = autoremove_wake_function; \
616 INIT_LIST_HEAD(&(wait)->task_list); \
617 (wait)->flags = 0; \
618 } while (0)
619
620 /**
621 * wait_on_bit - wait for a bit to be cleared
622 * @word: the word being waited on, a kernel virtual address
623 * @bit: the bit of the word being waited on
624 * @action: the function used to sleep, which may take special actions
625 * @mode: the task state to sleep in
626 *
627 * There is a standard hashed waitqueue table for generic use. This
628 * is the part of the hashtable's accessor API that waits on a bit.
629 * For instance, if one were to have waiters on a bitflag, one would
630 * call wait_on_bit() in threads waiting for the bit to clear.
631 * One uses wait_on_bit() where one is waiting for the bit to clear,
632 * but has no intention of setting it.
633 */
wait_on_bit(void * word,int bit,int (* action)(void *),unsigned mode)634 static inline int wait_on_bit(void *word, int bit,
635 int (*action)(void *), unsigned mode)
636 {
637 if (!test_bit(bit, word))
638 return 0;
639 return out_of_line_wait_on_bit(word, bit, action, mode);
640 }
641
642 /**
643 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
644 * @word: the word being waited on, a kernel virtual address
645 * @bit: the bit of the word being waited on
646 * @action: the function used to sleep, which may take special actions
647 * @mode: the task state to sleep in
648 *
649 * There is a standard hashed waitqueue table for generic use. This
650 * is the part of the hashtable's accessor API that waits on a bit
651 * when one intends to set it, for instance, trying to lock bitflags.
652 * For instance, if one were to have waiters trying to set bitflag
653 * and waiting for it to clear before setting it, one would call
654 * wait_on_bit() in threads waiting to be able to set the bit.
655 * One uses wait_on_bit_lock() where one is waiting for the bit to
656 * clear with the intention of setting it, and when done, clearing it.
657 */
wait_on_bit_lock(void * word,int bit,int (* action)(void *),unsigned mode)658 static inline int wait_on_bit_lock(void *word, int bit,
659 int (*action)(void *), unsigned mode)
660 {
661 if (!test_and_set_bit(bit, word))
662 return 0;
663 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
664 }
665
666 #endif /* __KERNEL__ */
667
668 #endif
669