1 /* Wait on a condition.  Generic version.
2    Copyright (C) 2000-2022 Free Software Foundation, Inc.
3    This file is part of the GNU C Library.
4 
5    The GNU C Library is free software; you can redistribute it and/or
6    modify it under the terms of the GNU Lesser General Public
7    License as published by the Free Software Foundation; either
8    version 2.1 of the License, or (at your option) any later version.
9 
10    The GNU C Library is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13    Lesser General Public License for more details.
14 
15    You should have received a copy of the GNU Lesser General Public
16    License along with the GNU C Library;  if not, see
17    <https://www.gnu.org/licenses/>.  */
18 
19 #include <pthread.h>
20 
21 #include <pt-internal.h>
22 #include <pthreadP.h>
23 #include <time.h>
24 
25 extern int __pthread_cond_timedwait_internal (pthread_cond_t *cond,
26 					      pthread_mutex_t *mutex,
27 					      clockid_t clockid,
28 					      const struct timespec *abstime);
29 
30 int
__pthread_cond_timedwait(pthread_cond_t * cond,pthread_mutex_t * mutex,const struct timespec * abstime)31 __pthread_cond_timedwait (pthread_cond_t *cond,
32 			  pthread_mutex_t *mutex,
33 			  const struct timespec *abstime)
34 {
35   return __pthread_cond_timedwait_internal (cond, mutex, -1, abstime);
36 }
37 
38 weak_alias (__pthread_cond_timedwait, pthread_cond_timedwait);
39 
40 int
__pthread_cond_clockwait(pthread_cond_t * cond,pthread_mutex_t * mutex,clockid_t clockid,const struct timespec * abstime)41 __pthread_cond_clockwait (pthread_cond_t *cond,
42 			  pthread_mutex_t *mutex,
43 			  clockid_t clockid,
44 			  const struct timespec *abstime)
45 {
46   return __pthread_cond_timedwait_internal (cond, mutex, clockid, abstime);
47 }
48 
49 weak_alias (__pthread_cond_clockwait, pthread_cond_clockwait);
50 
51 struct cancel_ctx
52 {
53   struct __pthread *wakeup;
54   pthread_cond_t *cond;
55 };
56 
57 static void
cancel_hook(void * arg)58 cancel_hook (void *arg)
59 {
60   struct cancel_ctx *ctx = arg;
61   struct __pthread *wakeup = ctx->wakeup;
62   pthread_cond_t *cond = ctx->cond;
63   int unblock;
64 
65   __pthread_spin_wait (&cond->__lock);
66   /* The thread only needs to be awaken if it's blocking or about to block.
67      If it was already unblocked, it's not queued any more.  */
68   unblock = wakeup->prevp != NULL;
69   if (unblock)
70     __pthread_dequeue (wakeup);
71   __pthread_spin_unlock (&cond->__lock);
72 
73   if (unblock)
74     __pthread_wakeup (wakeup);
75 }
76 
77 /* Block on condition variable COND until ABSTIME.  As a GNU
78    extension, if ABSTIME is NULL, then wait forever.  MUTEX should be
79    held by the calling thread.  On return, MUTEX will be held by the
80    calling thread.  */
81 int
__pthread_cond_timedwait_internal(pthread_cond_t * cond,pthread_mutex_t * mutex,clockid_t clockid,const struct timespec * abstime)82 __pthread_cond_timedwait_internal (pthread_cond_t *cond,
83 				   pthread_mutex_t *mutex,
84 				   clockid_t clockid,
85 				   const struct timespec *abstime)
86 {
87   error_t err;
88   int cancelled, oldtype, drain;
89   clockid_t clock_id;
90 
91   if (clockid != -1)
92     clock_id = clockid;
93   else
94     clock_id = __pthread_default_condattr.__clock;
95 
96   if (abstime && ! valid_nanoseconds (abstime->tv_nsec))
97     return EINVAL;
98 
99   err = __pthread_mutex_checklocked (mutex);
100   if (err)
101     return err;
102 
103   struct __pthread *self = _pthread_self ();
104   struct cancel_ctx ctx;
105   ctx.wakeup = self;
106   ctx.cond = cond;
107 
108   /* Test for a pending cancellation request, switch to deferred mode for
109      safer resource handling, and prepare the hook to call in case we're
110      cancelled while blocking.  Once CANCEL_LOCK is released, the cancellation
111      hook can be called by another thread at any time.  Whatever happens,
112      this function must exit with MUTEX locked.
113 
114      This function contains inline implementations of pthread_testcancel and
115      pthread_setcanceltype to reduce locking overhead.  */
116   __pthread_mutex_lock (&self->cancel_lock);
117   cancelled = (self->cancel_state == PTHREAD_CANCEL_ENABLE)
118       && self->cancel_pending;
119 
120   if (cancelled)
121     {
122       __pthread_mutex_unlock (&self->cancel_lock);
123       __pthread_exit (PTHREAD_CANCELED);
124     }
125 
126   self->cancel_hook = cancel_hook;
127   self->cancel_hook_arg = &ctx;
128   oldtype = self->cancel_type;
129 
130   if (oldtype != PTHREAD_CANCEL_DEFERRED)
131     self->cancel_type = PTHREAD_CANCEL_DEFERRED;
132 
133   /* Add ourselves to the list of waiters.  This is done while setting
134      the cancellation hook to simplify the cancellation procedure, i.e.
135      if the thread is queued, it can be cancelled, otherwise it is
136      already unblocked, progressing on the return path.  */
137   __pthread_spin_wait (&cond->__lock);
138   __pthread_enqueue (&cond->__queue, self);
139   if (cond->__attr != NULL && clockid == -1)
140     clock_id = cond->__attr->__clock;
141   __pthread_spin_unlock (&cond->__lock);
142 
143   __pthread_mutex_unlock (&self->cancel_lock);
144 
145   /* Release MUTEX before blocking.  */
146   __pthread_mutex_unlock (mutex);
147 
148   /* Increase the waiter reference count.  Relaxed MO is sufficient because
149      we only need to synchronize when decrementing the reference count.  */
150   atomic_fetch_add_relaxed (&cond->__wrefs, 2);
151 
152   /* Block the thread.  */
153   if (abstime != NULL)
154     err = __pthread_timedblock (self, abstime, clock_id);
155   else
156     {
157       err = 0;
158       __pthread_block (self);
159     }
160 
161   __pthread_spin_wait (&cond->__lock);
162   if (self->prevp == NULL)
163     {
164       /* Another thread removed us from the list of waiters, which means a
165          wakeup message has been sent.  It was either consumed while we were
166          blocking, or queued after we timed out and before we acquired the
167          condition lock, in which case the message queue must be drained.  */
168       if (!err)
169 	drain = 0;
170       else
171 	{
172 	  assert (err == ETIMEDOUT);
173 	  drain = 1;
174 	}
175     }
176   else
177     {
178       /* We're still in the list of waiters.  Noone attempted to wake us up,
179          i.e. we timed out.  */
180       assert (err == ETIMEDOUT);
181       __pthread_dequeue (self);
182       drain = 0;
183     }
184   __pthread_spin_unlock (&cond->__lock);
185 
186   /* If destruction is pending (i.e., the wake-request flag is nonzero) and we
187      are the last waiter (prior value of __wrefs was 1 << 1), then wake any
188      threads waiting in pthread_cond_destroy.  Release MO to synchronize with
189      these threads.  Don't bother clearing the wake-up request flag.  */
190   if ((atomic_fetch_add_release (&cond->__wrefs, -2)) == 3)
191     __gsync_wake (__mach_task_self (), (vm_offset_t) &cond->__wrefs, 0, 0);
192 
193   if (drain)
194     __pthread_block (self);
195 
196   /* We're almost done.  Remove the unblock hook, restore the previous
197      cancellation type, and check for a pending cancellation request.  */
198   __pthread_mutex_lock (&self->cancel_lock);
199   self->cancel_hook = NULL;
200   self->cancel_hook_arg = NULL;
201   self->cancel_type = oldtype;
202   cancelled = (self->cancel_state == PTHREAD_CANCEL_ENABLE)
203       && self->cancel_pending;
204   __pthread_mutex_unlock (&self->cancel_lock);
205 
206   /* Reacquire MUTEX before returning/cancelling.  */
207   __pthread_mutex_lock (mutex);
208 
209   if (cancelled)
210     __pthread_exit (PTHREAD_CANCELED);
211 
212   return err;
213 }
214