1 /* Wait on a semaphore with a timeout. Generic version.
2 Copyright (C) 2005-2022 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19 #include <semaphore.h>
20 #include <errno.h>
21 #include <assert.h>
22 #include <time.h>
23 #include <hurdlock.h>
24 #include <hurd/hurd.h>
25 #include <sysdep-cancel.h>
26
27 #include <pt-internal.h>
28
29 #if !__HAVE_64B_ATOMICS
30 static void
31 __sem_wait_32_finish (struct new_sem *isem);
32 #endif
33
34 static void
__sem_wait_cleanup(void * arg)35 __sem_wait_cleanup (void *arg)
36 {
37 struct new_sem *isem = arg;
38
39 #if __HAVE_64B_ATOMICS
40 atomic_fetch_add_relaxed (&isem->data, -((uint64_t) 1 << SEM_NWAITERS_SHIFT));
41 #else
42 __sem_wait_32_finish (isem);
43 #endif
44 }
45
46 int
__sem_timedwait_internal(sem_t * restrict sem,clockid_t clock_id,const struct timespec * restrict timeout)47 __sem_timedwait_internal (sem_t *restrict sem,
48 clockid_t clock_id,
49 const struct timespec *restrict timeout)
50 {
51 struct new_sem *isem = (struct new_sem *) sem;
52 int err, ret = 0;
53 int flags = isem->pshared ? GSYNC_SHARED : 0;
54
55 __pthread_testcancel ();
56
57 if (__sem_waitfast (isem, 0) == 0)
58 return 0;
59
60 int cancel_oldtype = LIBC_CANCEL_ASYNC();
61
62 #if __HAVE_64B_ATOMICS
63 uint64_t d = atomic_fetch_add_relaxed (&sem->data,
64 (uint64_t) 1 << SEM_NWAITERS_SHIFT);
65
66 pthread_cleanup_push (__sem_wait_cleanup, isem);
67
68 for (;;)
69 {
70 if ((d & SEM_VALUE_MASK) == 0)
71 {
72 /* No token, sleep. */
73 if (timeout)
74 err = __lll_abstimed_wait_intr (
75 ((unsigned int *) &sem->data) + SEM_VALUE_OFFSET,
76 0, timeout, flags, clock_id);
77 else
78 err = __lll_wait_intr (
79 ((unsigned int *) &sem->data) + SEM_VALUE_OFFSET,
80 0, flags);
81
82 if (err != 0)
83 {
84 /* Error, interruption or timeout, abort. */
85 if (err == KERN_TIMEDOUT)
86 err = ETIMEDOUT;
87 if (err == KERN_INTERRUPTED)
88 err = EINTR;
89 ret = __hurd_fail (err);
90 __sem_wait_cleanup (isem);
91 break;
92 }
93
94 /* Token changed */
95 d = atomic_load_relaxed (&sem->data);
96 }
97 else
98 {
99 /* Try to acquire and dequeue. */
100 if (atomic_compare_exchange_weak_acquire (&sem->data,
101 &d, d - 1 - ((uint64_t) 1 << SEM_NWAITERS_SHIFT)))
102 {
103 /* Success */
104 ret = 0;
105 break;
106 }
107 }
108 }
109
110 pthread_cleanup_pop (0);
111 #else
112 unsigned int v;
113
114 atomic_fetch_add_acquire (&isem->nwaiters, 1);
115
116 pthread_cleanup_push (__sem_wait_cleanup, isem);
117
118 v = atomic_load_relaxed (&isem->value);
119 do
120 {
121 do
122 {
123 do
124 {
125 if ((v & SEM_NWAITERS_MASK) != 0)
126 break;
127 }
128 while (!atomic_compare_exchange_weak_release (&isem->value,
129 &v, v | SEM_NWAITERS_MASK));
130
131 if ((v >> SEM_VALUE_SHIFT) == 0)
132 {
133 /* No token, sleep. */
134 if (timeout)
135 err = __lll_abstimed_wait_intr (&isem->value,
136 SEM_NWAITERS_MASK, timeout, flags, clock_id);
137 else
138 err = __lll_wait_intr (&isem->value,
139 SEM_NWAITERS_MASK, flags);
140
141 if (err != 0)
142 {
143 /* Error, interruption or timeout, abort. */
144 if (err == KERN_TIMEDOUT)
145 err = ETIMEDOUT;
146 if (err == KERN_INTERRUPTED)
147 err = EINTR;
148 ret = __hurd_fail (err);
149 goto error;
150 }
151
152 /* Token changed */
153 v = atomic_load_relaxed (&isem->value);
154 }
155 }
156 while ((v >> SEM_VALUE_SHIFT) == 0);
157 }
158 while (!atomic_compare_exchange_weak_acquire (&isem->value,
159 &v, v - (1 << SEM_VALUE_SHIFT)));
160
161 error:
162 pthread_cleanup_pop (0);
163
164 __sem_wait_32_finish (isem);
165 #endif
166
167 LIBC_CANCEL_RESET (cancel_oldtype);
168
169 return ret;
170 }
171
172 #if !__HAVE_64B_ATOMICS
173 /* Stop being a registered waiter (non-64b-atomics code only). */
174 static void
__sem_wait_32_finish(struct new_sem * isem)175 __sem_wait_32_finish (struct new_sem *isem)
176 {
177 unsigned int wguess = atomic_load_relaxed (&isem->nwaiters);
178 if (wguess == 1)
179 atomic_fetch_and_acquire (&isem->value, ~SEM_NWAITERS_MASK);
180
181 unsigned int wfinal = atomic_fetch_add_release (&isem->nwaiters, -1);
182 if (wfinal > 1 && wguess == 1)
183 {
184 unsigned int v = atomic_fetch_or_relaxed (&isem->value,
185 SEM_NWAITERS_MASK);
186 v >>= SEM_VALUE_SHIFT;
187 while (v--)
188 __lll_wake (&isem->value, isem->pshared ? GSYNC_SHARED : 0);
189 }
190 }
191 #endif
192
193 int
__sem_clockwait(sem_t * sem,clockid_t clockid,const struct timespec * restrict timeout)194 __sem_clockwait (sem_t *sem, clockid_t clockid,
195 const struct timespec *restrict timeout)
196 {
197 return __sem_timedwait_internal (sem, clockid, timeout);
198 }
199 weak_alias (__sem_clockwait, sem_clockwait);
200
201 int
__sem_timedwait(sem_t * restrict sem,const struct timespec * restrict timeout)202 __sem_timedwait (sem_t *restrict sem, const struct timespec *restrict timeout)
203 {
204 return __sem_timedwait_internal (sem, CLOCK_REALTIME, timeout);
205 }
206
207 weak_alias (__sem_timedwait, sem_timedwait);
208